from constants import *
from utils import evaluate_model_policy, plot_study, plot_fig
from trainer import get_trained_model
import optuna
from environment import StreetFighterEnv
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack
from actor_critic import A2CCNNPolicy
from feature_extractors import CNNExtractorWithAttention, CNNExtractor
from tuner import Tuner
import os
TIMESTEPS = 1000000
N_TRIALS = 20
PLOTLY_CONFIG = {"staticPlot": True}
model = A2C
model_dir = 'models/bias'
env = StreetFighterEnv(capture_movement=False)
policy_network = A2CCNNPolicy
policy_kwargs = dict(
features_extractor_class=CNNExtractorWithAttention,
features_extractor_kwargs=dict(features_dim=512,),
)
tuner = Tuner(model=model, env=env, policy_network=policy_network, policy_args=policy_kwargs,
timesteps=TIMESTEPS, save_dir=model_dir)
study = tuner.tune_study(n_trials=N_TRIALS, )
study.best_trial.number, study.best_params
[I 2022-04-17 18:13:52,757] A new study created in memory with name: no-name-5402990f-7199-42e1-a370-3127d1bb3252
[I 2022-04-17 18:14:31,311] Trial 0 finished with value: 1000.0 and parameters: {'gamma': 0.9626161263519397, 'learning_rate': 1.1414579541166597e-05, 'gae_lambda': 0.8173291103169752}. Best is trial 0 with value: 1000.0. [I 2022-04-17 18:15:11,475] Trial 1 finished with value: 0.0 and parameters: {'gamma': 0.9275898545960938, 'learning_rate': 5.2444593463428554e-05, 'gae_lambda': 0.8382994429303777}. Best is trial 0 with value: 1000.0.
(0,
{'gamma': 0.9626161263519397,
'learning_rate': 1.1414579541166597e-05,
'gae_lambda': 0.8173291103169752})
plots = plot_study(study)
for plot in plots:
plot.show("notebook", config=PLOTLY_CONFIG)
model = A2C
model_dir = 'models/bias_with_movement'
env = StreetFighterEnv(capture_movement=True)
policy_network = A2CCNNPolicy
policy_kwargs = dict(
features_extractor_class=CNNExtractorWithAttention,
features_extractor_kwargs=dict(features_dim=512,),
)
tuner = Tuner(model=model, env=env, policy_network=policy_network, policy_args=policy_kwargs,
timesteps=TIMESTEPS, save_dir=model_dir)
study = tuner.tune_study(n_trials=N_TRIALS, )
study.best_trial.number, study.best_params
[I 2022-04-17 18:15:11,805] A new study created in memory with name: no-name-3a47a46d-9399-46b3-b5d2-f7fe39cec9b6
[I 2022-04-17 18:16:08,348] Trial 0 finished with value: 2000.0 and parameters: {'gamma': 0.9853963563622253, 'learning_rate': 1.5078178326383642e-05, 'gae_lambda': 0.9610657932036595}. Best is trial 0 with value: 2000.0. [I 2022-04-17 18:16:44,978] Trial 1 finished with value: 0.0 and parameters: {'gamma': 0.9845244727551129, 'learning_rate': 6.08520759249335e-05, 'gae_lambda': 0.9578639279396778}. Best is trial 0 with value: 2000.0.
(0,
{'gamma': 0.9853963563622253,
'learning_rate': 1.5078178326383642e-05,
'gae_lambda': 0.9610657932036595})
plots = plot_study(study)
for plot in plots:
plot.show("notebook", config=PLOTLY_CONFIG)